Merge latest xen-unstable into xen-ia64-unstable
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Mon, 7 Nov 2005 17:13:38 +0000 (11:13 -0600)
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Mon, 7 Nov 2005 17:13:38 +0000 (11:13 -0600)
1  2 
linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre
linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
linux-2.6-xen-sparse/drivers/xen/console/console.c

index 2c81709555653b6644fa870a1f9dff5d90af7e9d,2c81709555653b6644fa870a1f9dff5d90af7e9d..b92bbc921f926488654427d70ef5fd9604d0481a
@@@ -48,10 -48,10 +48,10 @@@ cp arch/ia64/xen/drivers/evtchn_ia64.c 
  cp arch/ia64/xen/drivers/motherboard.c drivers/acpi/motherboard.c
  
  #still a few x86-ism's in various drivers/xen files, patch them
--cd drivers/xen
--if [ ! -e ia64.patch.semaphore ]
--then
--      cat ../../arch/ia64/xen/drivers/patches/* | patch -p1 -b
--fi
--touch ia64.patch.semaphore
--cd ../..
++#cd drivers/xen
++#if [ ! -e ia64.patch.semaphore ]
++#then
++#     cat ../../arch/ia64/xen/drivers/patches/* | patch -p1 -b
++#fi
++#touch ia64.patch.semaphore
++#cd ../..
index 2d1cde54e7a8bf52820ab528806eadb4ad7990b1,f8e5ebb61ed884d7c419c2912315cd4dce648e68..4c0a10541ba36a07d585efccee81453d509db6e5
@@@ -346,6 -360,6 +360,9 @@@ grant_ioctl(struct inode *inode, struc
        if ( hypercall.op != __HYPERVISOR_grant_table_op )
                return -ENOSYS;
  
++#ifdef __ia64__
++      ret = HYPERVISOR_grant_table_op(hypercall.arg[0], (void *)hypercall.arg[1], hypercall.arg[2]);
++#else
        /* hypercall-invoking asm taken from privcmd.c */
        __asm__ __volatile__ (
                "pushl %%ebx; pushl %%ecx; pushl %%edx; "
                TRAP_INSTR "; "
                "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
                : "=a" (ret) : "0" (&hypercall) : "memory" );
++#endif
  
        return ret;
  }
@@@ -423,8 -437,8 +441,13 @@@ gnttab_resume(void
        BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
        BUG_ON(setup.status != 0);
  
++#ifdef __ia64__
++      shared = __va(frames[0] << PAGE_SHIFT);
++      printk("grant table at %p\n", shared);
++#else
        for (i = 0; i < NR_GRANT_FRAMES; i++)
                set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
++#endif
  
        return 0;
  }
@@@ -450,7 -464,7 +473,9 @@@ gnttab_init(void
  
        BUG_ON(gnttab_resume());
  
++#ifndef __ia64__
        shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
++#endif
  
        for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
                gnttab_list[i] = i + 1;
index 3f9e90cfa8234e546b9448843ad575410f476e07,283fcd2ff68f8379835e870c01c20d0a9765ffd1..a88d8973323651b7564453362532a67860ad6c1b
  static unsigned long mmap_vstart;
  #define MMAP_PAGES                                            \
        (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#ifdef __ia64__
++static void *pending_vaddrs[MMAP_PAGES];
++#define MMAP_VADDR(_idx, _i) \
++      (unsigned long)(pending_vaddrs[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
++#else
  #define MMAP_VADDR(_req,_seg)                                         \
        (mmap_vstart +                                                  \
         ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +        \
         ((_seg) * PAGE_SIZE))
++#endif
  
  /*
   * Each outstanding request that we've passed to the lower device layers has a 
@@@ -368,27 -370,27 +377,31 @@@ static void dispatch_rw_block_io(blkif_
                        map[i].flags |= GNTMAP_readonly;
        }
  
-       BUG_ON(HYPERVISOR_grant_table_op(
-               GNTTABOP_map_grant_ref, map, nseg));
+       ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
+       BUG_ON(ret);
  
        for (i = 0; i < nseg; i++) {
-               if (unlikely(map[i].handle < 0)) {
-                       DPRINTK("invalid buffer -- could not remap it\n");
-                       fast_flush_area(pending_idx, nseg);
-                       goto bad_descriptor;
+               if (likely(map[i].handle >= 0)) {
+                       pending_handle(pending_idx, i) = map[i].handle;
++#ifdef __ia64__
++                      MMAP_VADDR(pending_idx,i) = gnttab_map_vaddr(map[i]);
++#else
+                       phys_to_machine_mapping[__pa(MMAP_VADDR(
+                               pending_idx, i)) >> PAGE_SHIFT] =
+                               FOREIGN_FRAME(map[i].dev_bus_addr>>PAGE_SHIFT);
++#endif
+                       fas        = req->frame_and_sects[i];
+                       seg[i].buf = map[i].dev_bus_addr | 
+                               (blkif_first_sect(fas) << 9);
+               } else {
+                       errors++;
                }
-               phys_to_machine_mapping[__pa(MMAP_VADDR(
-                       pending_idx, i)) >> PAGE_SHIFT] =
-                       FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-               pending_handle(pending_idx, i) = map[i].handle;
        }
  
-       for (i = 0; i < nseg; i++) {
-               fas         = req->frame_and_sects[i];
-               seg[i].buf  = map[i].dev_bus_addr | 
-                       (blkif_first_sect(fas) << 9);
+       if (errors) {
+               DPRINTK("invalid buffer -- could not remap it\n");
+               fast_flush_area(pending_idx, nseg);
+               goto bad_descriptor;
        }
  
        if (vbd_translate(&preq, blkif, operation) != 0) {
@@@ -494,15 -496,16 +507,32 @@@ static int __init blkif_init(void
  {
        int i;
        struct page *page;
+       int ret;
+       for (i = 0; i < MMAP_PAGES; i++)
+               pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
  
 +      if (xen_init() < 0)
 +              return -ENODEV;
 +
        blkif_interface_init();
  
++#ifdef __ia64__
++    {
++      extern unsigned long alloc_empty_foreign_map_page_range(unsigned long pages);
++      int i;
++
++      mmap_vstart =  alloc_empty_foreign_map_page_range(MMAP_PAGES);
++      printk("Allocated mmap_vstart: 0x%lx\n", mmap_vstart);
++      for(i = 0; i < MMAP_PAGES; i++)
++          pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
++      BUG_ON(mmap_vstart == NULL);
++    }
++#else
        page = balloon_alloc_empty_page_range(MMAP_PAGES);
        BUG_ON(page == NULL);
        mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
++#endif
  
        pending_cons = 0;
        pending_prod = MAX_PENDING_REQS;
index 6da5d9159c00052215d180ce459bd5dbde070e12,1a23f4495e1bb08bc313ddf05ece2aab6c67d62d..9c357d48d33ab8cfb880ae3203704f9c3a88ad9b
@@@ -771,9 -768,9 +771,15 @@@ static int __init xencons_init(void
  #endif
  
        if (xen_start_info->flags & SIF_INITDOMAIN) {
++#ifdef __ia64__
++              xencons_priv_irq = bind_virq_to_evtchn(VIRQ_CONSOLE);
++              bind_evtchn_to_irqhandler(xencons_priv_irq,
++                              xencons_priv_interrupt, 0, "console", NULL);
++#else
                xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
                (void)request_irq(xencons_priv_irq,
                                  xencons_priv_interrupt, 0, "console", NULL);
++#endif
        } else {
                xencons_ring_register_receiver(xencons_rx);
        }